[LINUX][X86_64] Fix initial memory mapping code.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 1 Jun 2006 18:07:40 +0000 (19:07 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 1 Jun 2006 18:07:40 +0000 (19:07 +0100)
The temporary mappings needed to set up the 1:1 mappings must be torn down after use; otherwise they may trigger the
WARN_ON() in vmap_pte_range() (namely if the chunk allocated to hold kernel and initial page tables gets close to or
exceeds 128Mb, or if a sufficiently high mem= argument causes the static allocations to grow beyond 128Mb, which in
either case means these mappings extend into the modules area).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c

index 1de014d05b33e210c2d94ddca912c77cf9baf1fb..0745c66b08a9e353b7b43c09392533ece8f752a5 100644 (file)
@@ -56,6 +56,8 @@
 struct dma_mapping_ops* dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
+int after_bootmem;
+
 extern unsigned long *contiguous_bitmap;
 
 static unsigned long dma_reserve __initdata;
@@ -74,7 +76,7 @@ extern unsigned long start_pfn;
        (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +   \
        __START_KERNEL_map)))
 
-static void early_make_page_readonly(void *va, unsigned int feature)
+static void __meminit early_make_page_readonly(void *va, unsigned int feature)
 {
        unsigned long addr, _va = (unsigned long)va;
        pte_t pte, *ptep;
@@ -83,6 +85,11 @@ static void early_make_page_readonly(void *va, unsigned int feature)
        if (xen_feature(feature))
                return;
 
+       if (after_bootmem) {
+               make_page_readonly(va, feature);
+               return;
+       }
+
        addr = (unsigned long) page[pgd_index(_va)];
        addr_to_page(addr, page);
 
@@ -198,10 +205,6 @@ void show_mem(void)
        printk(KERN_INFO "%lu pages swap cached\n",cached);
 }
 
-/* References to section boundaries */
-
-int after_bootmem;
-
 static void *spp_getpage(void)
 { 
        void *ptr;
@@ -448,9 +451,9 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
                pte = alloc_static_page(&pte_phys);
                pte_save = pte;
                for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
-                       if ((address >= end) ||
-                           ((address >> PAGE_SHIFT) >=
-                            xen_start_info->nr_pages)) { 
+                       if (address >= (after_bootmem
+                                       ? end
+                                       : xen_start_info->nr_pages << PAGE_SHIFT)) {
                                __set_pte(pte, __pte(0)); 
                                continue;
                        }
@@ -550,7 +553,7 @@ void __init xen_init_pt(void)
                mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
 }
 
-void __init extend_init_mapping(unsigned long tables_space)
+static void __init extend_init_mapping(unsigned long tables_space)
 {
        unsigned long va = __START_KERNEL_map;
        unsigned long phys, addr, *pte_page;
@@ -666,7 +669,18 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
                        set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
        }
 
-       BUG_ON(!after_bootmem && start_pfn != table_end);
+       if (!after_bootmem) {
+               BUG_ON(start_pfn != table_end);
+
+               /* Destroy the temporary mappings created above. */
+               start = __START_KERNEL_map + (table_start << PAGE_SHIFT);
+               end = start + tables_space;
+               for (; start < end; start += PAGE_SIZE) {
+                       /* Should also clear out and reclaim any page table
+                          pages no longer needed... */
+                       WARN_ON(HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0));
+               }
+       }
 
        __flush_tlb_all();
 }